Modeling the Joint Distribution of Wind Speed and Direction using Gaussain Mixture Models

OEN Method: Harris, Cook The parent wind speed distribution: Why Weibull? http://www.sciencedirect.com/science/article/pii/S0167610514001056

Gaussian Mixture Models, http://scikit-learn.org/stable/modules/mixture.html

1. Set up

1.1 Environment

In [1]:
%matplotlib inline
%load_ext autoreload
%autoreload 2

from import_file import *
from helpers.parallel_helper import *
load_libs()

plt.rcParams['axes.autolimit_mode'] = 'round_numbers'
plt.rcParams['axes.xmargin'] = 0.
plt.rcParams['axes.ymargin'] = 0.
mpl.rcParams['patch.force_edgecolor'] = True

1.2 Read Data

In [2]:
# file_path= './data/NCDC/jerez/dat.txt' # time shift
# file_path= './data/NCDC/almeria/dat.txt'

# Greece
# file_path= './data/NCDC/eleftherios_intl/dat.txt'
# file_path= './data/NCDC/elefsis/dat.txt' # bad dataset
# file_path= './data/NCDC/malaga/dat.txt'
# file_path= './data/NCDC/gibraltar/dat.txt' # bad fit

# Turkey
# file_path= './data/NCDC/turkey/konya/dat.txt' 
# file_path= './data/NCDC/turkey/sivas/dat.txt' # bad dataset
# file_path= './data/NCDC/turkey/balikesir/dat.txt' # bad dataset
# file_path= './data/NCDC/turkey/bartin/dat.txt' # bad dataset

# Iran
# file_path= './data/NCDC/iran/chahbahar/dat.txt'
# file_path= './data/NCDC/iran/zabol/dat.txt' # Problematic data
# file_path= './data/NCDC/iran/torbat_heydarieh/dat.txt' # Unusable

# UAE
# file_path= './data/NCDC/abu_dhabi_intl/dat.txt' # Time shift
# file_path= './data/NCDC/bateen/dat.txt' # Time shift
# file_path= './data/NCDC/buraimi/dat.txt' # not good dataset

# file_path= './data/NCDC/uk/marham/dat.txt' 
file_path= './data/NCDC/uk/tiree/dat.txt'  # try 4
# file_path= './data/NCDC/uk/boscombe_down/dat.txt' # 4?, numpy bug
# file_path= './data/NCDC/uk/middle_wallop/dat.txt' 
# file_path= './data/NCDC/uk/southhamption/dat.txt' # high 0, trend
# file_path= './data/NCDC/uk/bournemouth/dat.txt' # 4?
# file_path= "./data/NCDC/uk/weybourne/dat.txt"
# file_path= "./data/NCDC/uk/skye_lusa/dat.txt" # 
# file_path= "./data/NCDC/uk/wattisham/dat.txt"
# file_path= "./data/NCDC/uk/south_uist_range/dat.txt" # inpropoer direction R square measure
# file_path= "./data/NCDC/uk/holbeach/dat.txt" # inpropoer direction R square measure
# file_path= "./data/NCDC/uk/cambridge/dat.txt" # inpropoer direction R square measure
# file_path= "./data/NCDC/us/baltimore/dat.txt" # time too short
# file_path= "./data/NCDC/uk/bealach_na_ba/dat.txt" # time too short
# file_path= "./data/NCDC/uk/benbecula/dat.txt" # truncate (untruncate in m/s), 4?

# file_path, NUMBER_OF_GAUSSIAN = "./data/NCDC/europe/landsberg_lech/dat.txt", 4 # very good, can try 4
# file_path= './data/NCDC/europe/pau_pyrenees/dat.txt' # unit shift, 2; force using knot 
# file_path= "./data/NCDC/europe/vatry/dat.txt"  # double peak, initial speed (should be good with m/s), mixed report type
# file_path= "./data/NCDC/europe/neuburg/dat.txt"
# file_path= "./data/NCDC/europe/valladolid/dat.txt"
# file_path= "./data/NCDC/europe/laupheim/dat.txt" # double peak, 4; very good, trend
# file_path= "./data/NCDC/europe/avord/dat.txt" # try 4, initial speed (should be good with m/s)
# file_path= './data/NCDC/europe/ciampino/dat.txt' # try 4, bandwidth?
# file_path= "./data/NCDC/europe/holzdorf/dat.txt" # 2008 year
# file_path= "./data/NCDC/europe/huspel_aws/dat.txt"  # integer, 4?
# file_path= "./data/NCDC/europe/barayas/dat.txt" # numpy problem
# file_path= './data/NCDC/europe/tenerife_sur/dat.txt'  # some directions are blocked
# file_path= './data/NCDC/europe/nantes/dat.txt' # some dir R square / K-S differs big, unit detect fails

# file_path= './data/NCDC/boston_16nm/dat.txt' # Offshore

# file_path= "./data/NCDC/cn/shanghai/hongqiao_intl/dat.txt" # care for the sampling time
# file_path= "./data/NCDC/cn/shanghai/pudong/dat.txt"
# file_path= "./data/NCDC/cn/nanjing_lukou/dat.txt" 
# file_path= "./data/NCDC/cn/zhengzhou_xinzheng/dat.txt" 
# file_path= "./data/NCDC/cn/tianjin/binhai/dat.txt" # few 0, trend, stationary speed, unstationary direction
# file_path= "./data/NCDC/cn/tianjin/tianjing/dat.txt" # 16 sectors
# file_path= "./data/NCDC/cn/hefei_luogang/dat.txt" # few 0, trend
# file_path= "./data/NCDC/cn/shijiazhuang_zhengding/dat.txt" 
# file_path= "./data/NCDC/cn/henan_gushi/dat.txt" # 16 sectors, fit not very good
# file_path= "./data/NCDC/cn/nanning_wuxu/dat.txt" # numpy priblem, unstationary speed
# file_path= './data/NCDC/cn/macau/dat.txt'  
# file_path= "./data/NCDC/cn/hk_intl/dat.txt" # few 0

# file_path= './data/NCDC/southeast_asia/malaysia/mersing/dat.txt' # 2 mode, paper comparison
# file_path= './data/NCDC/southeast_asia/malaysia/penang/dat.txt'
# file_path= './data/NCDC/southeast_asia/malaysia/butterworth/dat.txt' # 2 mode 
# file_path= "./data/NCDC/southeast_asia/malaysia/bsultan_mahmud/dat.txt" # stable
# file_path= "./data/NCDC/southeast_asia/malaysia/bsultan_ismail/dat.txt" # 
# file_path= "./data/NCDC/southeast_asia/singapore/changi/dat.txt" # trend, no 0, questionary data
# file_path= "./data/NCDC/southeast_asia/singapore/paya_lebar/dat.txt" # questionary data
# file_path= "./data/NCDC/southeast_asia/singapore/seletar/dat.txt"
# file_path= "./data/NCDC/east_asia/cheongju_intl/dat.txt" # 2005-2009  may have problem, fit is good; numpy problem
# file_path= "./data/NCDC/east_asia/daegu_ab/dat.txt" # recent 5 year may have problem, but fit is generally good; numpy problem

# file_path= "./data/NCDC/oceania/auckland_intl/dat.txt"  # Good data, Weird KDE shape, might be blocked?
# file_path= "./data/NCDC/oceania/brisbane_archerfield/dat.txt" # high 0, few data 
# file_path= "./data/NCDC/oceania/narrandera/dat.txt" # high 0, few data
# file_path= "./data/NCDC/oceania/canberra/dat.txt" # high 0, numpy problem

# file_path = './data/asos/denver/hr_avg.csv'
# file_path = './data/asos/bismarck_ND/hr_avg.csv' # try 4
# file_path = './data/asos/aberdeen_SD/hr_avg.csv' # only to 2012, good fit, try 2
# file_path = './data/asos/minneapolis/hr_avg.csv'
# file_path = './data/asos/lincoln_NE/hr_avg.csv' 
# file_path = './data/asos/des_moines_IA/hr_avg.csv'
# file_path = './data/asos/springfield_IL/hr_avg.csv' # good fit
# file_path = './data/asos/topeka/hr_avg.csv' # High 0

# file_path = './data/NDAWN/baker/hr_avg.csv' # 4 might be better
# file_path = './data/NDAWN/dickinson/hr_avg.csv'
# file_path = './data/NDAWN/rugby/hr_avg.csv'
# file_path = './data/NDAWN/bowman/hr_avg.csv'
# file_path = './data/NDAWN/grand_forks/hr_avg.csv'
# file_path = './data/NDAWN/williston/hr_avg.csv'
# file_path = './data/NDAWN/jamestown/hr_avg.csv'
In [3]:
if "cn_database" in file_path: 
    df = read_cn_database(file_path)
elif 'NCDC' in file_path:
    df = pd.read_csv(file_path, header=0, skipinitialspace=True, dtype={'HrMn':'object'})
    df.rename(columns={'Date':'date','Dir':'dir','Spd':'speed','Type':'type','I.1':'wind_type'}, inplace=True)
    df = df[['date','HrMn','type','dir','speed','wind_type' ]]
    df.dropna(subset=['dir','speed'], inplace=True)
    integer_data = True
elif 'NDAWN' in file_path:
    df = pd.read_csv(file_path, header=0, skipinitialspace=True, dtype={'HrMn':'object'})
    df['type']='default'
    df['wind_type']='default'
    df = df.dropna()
    integer_data = False
    knot_unit = False
else:
    # ASOS
    df = pd.read_csv(file_path, header=0, skipinitialspace=True, dtype={'HrMn':'object'})
    df['type']='default'
    df['wind_type']='default'
    df = df.dropna()
    integer_data = False
    knot_unit = True
In [4]:
df
Out[4]:
date HrMn type dir speed wind_type
0 19790101 0000 FM-12 230 3.6 N
1 19790101 0100 FM-12 250 5.1 N
2 19790101 0200 FM-12 250 7.7 N
3 19790101 0300 FM-12 270 9.3 N
4 19790101 0400 FM-12 300 11.3 N
5 19790101 0500 FM-12 330 10.3 N
6 19790101 0600 FM-12 330 9.3 N
7 19790101 0700 FM-12 360 6.7 N
8 19790101 0800 FM-12 310 6.7 N
9 19790101 0900 FM-12 290 4.1 N
10 19790101 1000 FM-12 280 8.2 N
11 19790101 1100 FM-12 290 6.2 N
12 19790101 1200 FM-12 20 3.1 N
13 19790101 1300 FM-12 280 8.2 N
14 19790101 1400 FM-12 280 7.7 N
15 19790101 1500 FM-12 280 10.3 N
16 19790101 1600 FM-12 290 3.6 N
17 19790101 1700 FM-12 300 9.3 N
18 19790101 1800 FM-12 290 4.6 N
19 19790101 1900 FM-12 340 5.7 N
20 19790101 2000 FM-12 60 3.6 N
21 19790101 2100 FM-12 110 3.6 N
22 19790101 2200 FM-12 90 3.6 N
23 19790101 2300 FM-12 320 3.6 N
24 19790102 0000 FM-12 280 14.4 N
25 19790102 0100 FM-12 270 13.4 N
26 19790102 0200 FM-12 280 13.4 N
27 19790102 0300 FM-12 280 11.8 N
28 19790102 0400 FM-12 290 8.2 N
29 19790102 0500 FM-12 290 6.2 N
... ... ... ... ... ... ...
486093 20160801 1350 FM-15 170 3.1 V
486094 20160801 1400 FM-12 170 3.1 N
486095 20160801 1420 FM-15 150 2.1 V
486096 20160801 1450 FM-15 30 2.6 V
486097 20160801 1500 FM-12 30 2.6 N
486098 20160801 1520 FM-15 999 1.5 V
486099 20160801 1550 FM-15 130 2.1 N
486100 20160801 1600 FM-12 130 2.1 N
486101 20160801 1620 FM-15 100 2.1 V
486102 20160801 1650 FM-15 110 3.1 V
486103 20160801 1700 FM-12 110 3.1 N
486104 20160801 1720 FM-15 70 3.1 V
486105 20160801 1750 FM-15 50 4.6 V
486106 20160801 1800 FM-12 50 4.6 N
486107 20160801 1850 FM-15 60 5.1 N
486108 20160801 1900 FM-12 60 5.1 N
486109 20160801 1920 FM-15 60 5.1 N
486110 20160801 1950 FM-15 60 4.1 N
486111 20160801 2000 FM-12 60 4.1 N
486112 20160801 2020 FM-15 50 4.1 N
486113 20160801 2050 FM-15 50 3.6 N
486114 20160801 2100 FM-12 50 3.6 N
486115 20160801 2120 FM-15 50 3.6 N
486116 20160801 2150 FM-15 60 3.6 N
486117 20160801 2200 FM-12 60 3.6 N
486118 20160801 2220 FM-15 60 3.6 N
486119 20160801 2250 FM-15 60 3.1 N
486120 20160801 2300 FM-12 60 3.1 N
486121 20160801 2320 FM-15 70 4.1 N
486122 20160801 2350 FM-15 70 4.1 N

486123 rows × 6 columns

In [5]:
df['time']=pd.to_datetime(df["date"].astype(str).map(str) + df["HrMn"], format='%Y%m%d%H%M')
df.set_index(['time'], inplace=True)
df['HrMn']=df['HrMn'].astype(int)
df = df.query("(dir <= 999) & (speed < 100) & \
              (date >= 19700000) & (date < 20170000) ")
In [6]:
plot_speed_and_angle_distribution(df.speed, df.dir)
D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\__init__.py:938: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.
  warnings.warn(self.msg_depr % (key, alt_key))
In [7]:
# Dir [10,360]=> [0,350]
df['dir'] = df['dir'].apply(lambda x: x%360 if x < 999 else x) 
df['month'] = df['date']%10000//100
# Convert Windrose coordianates to Polar Cooridinates 
df['dir_windrose'] = df['dir']
df['dir'] = df['dir'].apply(lambda x: (90 - x)%360 if x < 999 else x)
df.describe()
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:2: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  from ipykernel import kernelapp as app
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:3: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  app.launch_new_instance()
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:5: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:6: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
Out[7]:
date HrMn dir speed month dir_windrose
count 4.853830e+05 485383.000000 485383.000000 485383.000000 485383.000000 485383.000000
mean 2.000788e+07 1153.725957 205.073847 7.268022 6.484026 204.830806
std 1.142918e+05 670.111036 129.567515 3.840033 3.442359 134.306571
min 1.979010e+07 0.000000 0.000000 0.000000 1.000000 0.000000
25% 1.991013e+07 600.000000 130.000000 4.600000 4.000000 140.000000
50% 2.003032e+07 1120.000000 200.000000 6.700000 6.000000 200.000000
75% 2.011102e+07 1700.000000 270.000000 9.800000 9.000000 270.000000
max 2.016080e+07 2350.000000 999.000000 38.600000 12.000000 999.000000
In [8]:
df.plot(y='speed',legend=True,figsize=(20,5))
Out[8]:
<matplotlib.axes._subplots.AxesSubplot at 0x55e1828>

1.2.1 Unit Detection

In [9]:
df['decimal'] = df.speed % 1
df.decimal.hist(alpha=0.5, label='m/s', figsize=(4, 3))
if 'knot_unit' not in globals():
    knot_unit = True if len(df.query('decimal >= 0.2')) / len(df) > 0.3 else False
    
    if knot_unit:
        df['speed'] = df['speed'] * 1.943845
        df['decimal'] = df.speed % 1
        df.decimal.hist(alpha=0.5, label='knot')
        # need more elaboration, some is not near an integer
        df['speed'] = df['speed'].apply(lambda x: int(round(x)))
    plt_configure(xlabel='Decimal', ylabel='Frequency', legend={'loc': 'best'}, title='Decimal Distribution')
    
df.drop(['decimal'], 1,inplace=True)
print(knot_unit)
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:1: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
  if __name__ == '__main__':
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:7: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:8: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:11: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame.
Try using .loc[row_indexer,col_indexer] = value instead

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:14: SettingWithCopyWarning: 
A value is trying to be set on a copy of a slice from a DataFrame

See the caveats in the documentation: http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy
True
In [10]:
dir_unit_text = ' (degree)'
if knot_unit == True:
    speed_unit_text = ' (knot)'
else: 
    speed_unit_text = ' (m/s)'

1.2.2 Sampling Type Selection

In [11]:
sample_type = df.query('date > 20000000')['type']
sample_type.value_counts().plot(
    kind = 'bar', title = 'Report Types Comprisement', figsize=(4,3))

report_type_most_used = sample_type.value_counts().argmax()
df = df.query("type==@report_type_most_used")

1.2.3 Sampling Time Selection

In [12]:
MID_YEAR = (min(df.date)//10000+max(df.date)//10000)//2

df['HrMn'].value_counts().sort_index().plot(kind='bar', alpha=0.5,label='Overall')
df.query('date > @MID_YEAR * 10000')['HrMn'].value_counts().sort_index().plot(
    kind='bar', alpha=0.5, label='> %s' %  MID_YEAR )

plt_configure(xlabel='Sampling Time', ylabel='Frequency', legend={'loc':'best'}, figsize=(8, 4), 
              title = 'Sampling Time Distribution, Overall and > %s ' %  MID_YEAR)
In [13]:
df['sample_time'] = df.HrMn % 100 
sample_time = df.query('date > 20000000')['sample_time']
sample_times = sample_time.value_counts()[sample_time.value_counts() > 2000]
sample_times = sample_times.index.tolist()
# df = df.query("sample_time in @sample_times")
df = df.query("sample_time == @sample_times[0]")
df.drop(['sample_time'], 1,inplace=True)
print(sample_times)

df['HrMn'].value_counts().sort_index().plot(kind='bar', alpha=0.5, figsize=(10, 4))
[0]
Out[13]:
<matplotlib.axes._subplots.AxesSubplot at 0xf0960b8>

1.3 Data Wrangling

1.3.1 Artefacts

1.3.1.1 wrong direction record

In [14]:
if integer_data:
    display(df.query("(dir % 10 >= 0.1) & (dir != 999)"))
    df = df.query('(dir % 10 <= 0.1) | (dir == 999)')
date HrMn type dir speed wind_type month dir_windrose
time

1.3.1.2 sudden increase in speed

In [15]:
# sudden increse
df['incre'] = df.speed.diff(1)
df['incre'].fillna(0, inplace=True)
df['incre_reverse'] = df.speed.diff(-1)
df['incre_reverse'].fillna(0, inplace=True)

display(df.sort_values(by='speed',ascending=False).head(10))
df['incre'].plot(kind='hist', bins=arange(-15, 15), legend=True, figsize=(8, 3))
date HrMn type dir speed wind_type month dir_windrose incre incre_reverse
time
1991-01-03 06:00:00 19910103 600 FM-12 200 70 N 1 250 44.0 46.0
2004-04-21 11:00:00 20040421 1100 FM-12 290 66 N 4 160 33.0 32.0
1983-10-18 14:00:00 19831018 1400 FM-12 210 61 N 10 240 31.0 24.0
1979-05-09 01:00:00 19790509 100 FM-12 140 61 N 5 310 45.0 48.0
1989-02-13 15:00:00 19890213 1500 FM-12 160 56 N 2 290 6.0 6.0
1993-01-21 21:00:00 19930121 2100 FM-12 180 56 N 1 270 8.0 6.0
1993-01-17 05:00:00 19930117 500 FM-12 180 55 N 1 270 12.0 3.0
2011-12-08 14:00:00 20111208 1400 FM-12 180 55 N 12 270 3.0 6.0
1981-02-27 04:00:00 19810227 400 FM-12 320 55 N 2 130 19.0 22.0
1979-06-06 04:00:00 19790606 400 FM-12 250 55 N 6 200 49.0 48.0
Out[15]:
<matplotlib.axes._subplots.AxesSubplot at 0xe1bada0>
In [16]:
incre_threshold = 20 if knot_unit else 10
print('sudden increase number', len(df.query('(incre > @incre_threshold )&(incre_reverse > @incre_threshold )')))
df = df.query('(incre < @incre_threshold )|(incre_reverse < @incre_threshold )')

# Check the max speed
display(df.sort_values(by='speed',ascending=False).head(10))
df.drop(['incre', 'incre_reverse'], 1, inplace=True)
sudden increase number 22
date HrMn type dir speed wind_type month dir_windrose incre incre_reverse
time
1993-01-21 21:00:00 19930121 2100 FM-12 180 56 N 1 270 8.0 6.0
1989-02-13 15:00:00 19890213 1500 FM-12 160 56 N 2 290 6.0 6.0
1993-01-17 05:00:00 19930117 500 FM-12 180 55 N 1 270 12.0 3.0
1981-02-27 04:00:00 19810227 400 FM-12 320 55 N 2 130 19.0 22.0
2011-12-08 14:00:00 20111208 1400 FM-12 180 55 N 12 270 3.0 6.0
1979-12-17 01:00:00 19791217 100 FM-12 210 54 N 12 240 1.0 2.0
1984-01-21 19:00:00 19840121 1900 FM-12 310 54 N 1 140 4.0 2.0
2011-12-08 12:00:00 20111208 1200 FM-12 190 54 N 12 260 2.0 2.0
2008-01-09 05:00:00 20080109 500 FM-12 170 53 N 1 280 13.0 15.0
1996-11-06 05:00:00 19961106 500 FM-12 210 53 N 11 240 34.0 5.0

1.3.2 0 Speed

In [17]:
with_too_many_zero, null_wind_frequency = is_with_too_many_zero(df.query("(date >= 20050000)"))
delete_zero = with_too_many_zero
if delete_zero:
    df = df.query('(speed > 0)')
print(delete_zero, null_wind_frequency)
False 0.00653928680903

1.3.3 Direction re-aligment and 999

For some dataset, the 16 sectors are not record properly,

e.g. the sectors are [0,20,30,50], need to redistribute the angle into 22.5

In [18]:
display(df['dir'].value_counts().sort_index())
effective_column = df.query('dir < 999')['dir'].value_counts()[df['dir'].value_counts() > 30].sort_index()
if integer_data:
    SECTOR_LENGTH = 360/len(effective_column) 
else: 
    SECTOR_LENGTH = 10
print(len(effective_column), SECTOR_LENGTH)
0       2920
10      2831
20      4235
30      4516
40      4623
50      4010
60      5202
70      6821
80      8805
90      8527
100     8698
110     7263
120     7665
130     7077
140     8106
150     8517
160    10467
170    10216
180    11171
190    11178
200    12361
210    11430
220    12151
230    10247
240    10279
250     9925
260    12179
270    11267
280    12124
290    11058
300    12298
310    10270
320     8852
330     5863
340     4178
350     2837
999     4034
Name: dir, dtype: int64
36 10.0
In [19]:
df.query('dir == 999')['speed'].value_counts()
Out[19]:
0    4031
2       2
3       1
Name: speed, dtype: int64
In [20]:
df=realign_direction(df, effective_column)
df=fill_direction_999(df, SECTOR_LENGTH)

1.4 Time Shift Comparison

In [21]:
DIR_REDISTRIBUTE = 'even'
if DIR_REDISTRIBUTE == 'even':
    DIR_BIN = arange(-5, 360, 10) 
elif DIR_REDISTRIBUTE == 'round_up':
    DIR_BIN = arange(0, 360+10, 10) 

# Comparison between mid_year, looking for: 
# 1. Odd Even Bias
# 2. Time Shift of Wind Speed Distribution
bins = arange(0, df.speed.max() + 1)
df.query('date < @MID_YEAR * 10000')['speed'].plot(
    kind='hist', alpha=0.5,bins=bins, label='< %s' % MID_YEAR)

df.query('date > @MID_YEAR * 10000')['speed'].plot(
    kind='hist', alpha=0.5,bins=bins, label='> %s' % MID_YEAR)

plt.suptitle('Speed Comparison between year < %s, > %s ' % (MID_YEAR, MID_YEAR), fontsize = 14)
plt_configure(xlabel='Speed', ylabel='Frequency', legend=True, figsize=(8, 3))
In [22]:
df.query('date < @MID_YEAR * 10000')['dir'].plot(
    kind='hist', alpha=0.5,bins=DIR_BIN, label='< %s' % MID_YEAR)

df.query('date > @MID_YEAR * 10000')['dir'].plot(
    kind='hist', alpha=0.5,bins=DIR_BIN, label='> %s' % MID_YEAR)

plt.suptitle('Dir Comparison between year < %s, and > %s ' % (MID_YEAR, MID_YEAR), fontsize = 14)
plt_configure(xlabel='Dir', ylabel='Frequency', legend={'loc':'best'}, figsize=(8, 3), tight='x')
In [23]:
# Inspect the time shift of speed and degree distribution, and odd-even bias
check_time_shift(df, speed_unit_text=speed_unit_text, dir_unit_text=dir_unit_text)
1979 - 1979
D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\__init__.py:938: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.
  warnings.warn(self.msg_depr % (key, alt_key))
1980 - 1984
1985 - 1989
1990 - 1994
1995 - 1999
2000 - 2004
2005 - 2009
2010 - 2014
2015 - 2016
In [24]:
df.resample('A').mean().plot(y='speed')
plt.gca().set_ylim(bottom=0)
df.resample('M').mean().plot(y='speed', figsize=(20,4))
plt.gca().set_ylim(bottom=0)
Out[24]:
(0, 25.0)
In [25]:
display(df[df['dir'].isnull()])
df.dropna(subset=['dir'], inplace=True)
date HrMn type dir speed wind_type month dir_windrose
time
In [26]:
for column in ['speed', 'dir']:
    if column == 'speed':
        bins = arange(0, df[column].max()+1, 1)
    else:
        bins = arange(0, 361, 10)
    den, _ = np.histogram(df[column], bins=bins, density=True)
    y_top=max(den)*1.2
    for year in arange(1980, 2016):
        end_year = year
        sub_df = df[str(year):str(end_year)]
        if len(sub_df) > 5000:
            plt.figure()
            df[column].hist(bins=bins, alpha=0.3, normed=True)
            sub_df[column].hist(bins=bins, alpha=0.5, figsize=(3,1.5), normed=True)
            plt.gca().set_ylim(top=y_top)
            plt_configure(title=str(year))
    align_figures()
D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\pyplot.py:524: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
  max_open_warning, RuntimeWarning)
In [27]:
for column in ['speed', 'dir']:
    if column == 'speed':
        bins = arange(0, df[column].max()+1, 1)
    else:
        bins = arange(0, 361, 10)
    density_all, _ = np.histogram(df[column], bins=bins, density=True)
    df[column].hist(bins=bins, figsize=(5,3))

    R_squares = []
    years = []
    for year in arange(1980, 2016):
        start_year, end_year = year-1, year+1
        sub_df = df[str(start_year):str(end_year)]
        if len(sub_df) > 5000:
            density, _ = np.histogram(sub_df[column], bins=bins, density=True)
            y_mean = np.mean(density_all)
            SS_tot = np.sum(np.power(density_all - y_mean, 2))
            SS_res = np.sum(np.power(density_all - density, 2))

            R_square = 1 - SS_res / SS_tot
            R_squares.append(R_square)
            years.append(year)

    plt.figure()
    plot(years, R_squares)
    ylim = max(min(plt.gca().get_ylim()[0],0.85),0)
    plt.gca().set_ylim(bottom=ylim, top=1)
    plt_configure(figsize=(5,3))
    align_figures()

1.5 Re-distribute Direction and Speed (Optional)

e.g. Dir 50 -> -45 ~ 55, to make KDE result better

In [28]:
if integer_data:
    df = randomize_angle(df, DIR_REDISTRIBUTE, SECTOR_LENGTH)
In [29]:
if integer_data:
    if delete_zero:
        redistribute_method = 'down'
    else:
        redistribute_method = 'up'

    df, speed_redistribution_info = randomize_speed(df, redistribute_method)
Redistribute upward, e.g. 0 -> [0,1]

1.6 Generate (x,y) from (speed,dir)

In [30]:
# Cook orientation
# df['dir']= (df['dir'] + 180)%360
In [31]:
# There might be a small dot in the centre, which is due to too many zero (more than 1 speed) in center
# Scatter plot in matplot has performance issue, the speed is very slow
df['x'] = df['speed'] * cos(df['dir'] * pi / 180.0)
df['y'] = df['speed'] * sin(df['dir'] * pi / 180.0)

2. Re-select Data and Overview

2.1 Data Overview

In [32]:
## Summery of the data selection
print('Knot unit?', knot_unit)
print('Report type used:', report_type_most_used)
print('Sampling time used:', sample_times)
if 'speed_redistribution_info' in globals():
    print('Speed redistribution info:', speed_redistribution_info )

df_all_years = df # for later across-year comparison
df = df_all_years.query('(date >= 20100000) & (date < 20150000)')
# df = df.query('(HrMn == 0) and (speed >= 0.5) and (date%10000 > 900) and (date%10000 < 1000)' )
df.describe()
Knot unit? True
Report type used: FM-12
Sampling time used: [0]
Speed redistribution info: Redistribute upward, e.g. 0 -> [0,1]
Out[32]:
date HrMn dir speed month dir_windrose x y
count 4.283700e+04 42837.000000 42837.000000 42837.000000 42837.000000 42837.000000 42837.000000 42837.000000
mean 2.012072e+07 1148.978687 193.694646 14.401044 6.518150 191.330789 -2.259561 -2.884570
std 1.413668e+04 693.172819 90.977604 7.306553 3.459189 102.782601 10.601677 11.617160
min 2.010010e+07 0.000000 -4.997333 0.008143 1.000000 0.000000 -55.156019 -48.430597
25% 2.011040e+07 500.000000 122.849834 8.808094 4.000000 130.000000 -9.346615 -11.045167
50% 2.012070e+07 1100.000000 202.129235 13.515039 7.000000 200.000000 -2.170273 -3.088895
75% 2.013100e+07 1800.000000 270.693494 18.977568 10.000000 270.000000 5.051234 5.526688
max 2.014123e+07 2300.000000 354.970741 55.332336 12.000000 999.000000 36.708913 37.899357
In [33]:
df.plot(y='speed',legend=True,figsize=(20,5))
Out[33]:
<matplotlib.axes._subplots.AxesSubplot at 0x24fc7d30>
In [34]:
df.resample('M').count().plot(y='date', kind='bar',figsize=(20,4))
Out[34]:
<matplotlib.axes._subplots.AxesSubplot at 0x25553550>
In [35]:
# 90 degree is east
ax = WindroseAxes.from_ax()
viridis = plt.get_cmap('viridis')
ax.bar(df.dir_windrose, df.speed, normed=True, opening=0.8, edgecolor='white', nsector=36, cmap=viridis)
ax.set_legend()
D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\cbook.py:136: MatplotlibDeprecationWarning: The axisbg attribute was deprecated in version 2.0. Use facecolor instead.
  warnings.warn(message, mplDeprecation, stacklevel=1)
In [36]:
if len(df) > 1000000:
    bins=arange(0,362)
    df['dir'].hist(bins=bins, normed=True,alpha=0.5,label='min')
    
    df = df_all_years.sample(n=500000, replace=True)    
    df['dir'].hist(bins=bins, normed=True,alpha=0.5,label='min resmapled')
    plt_configure(legend=True, figsize=(20,4))
In [37]:
x, y_weibull, y_cdf_weibull, weibull_params, y_ecdf = fit_weibull_and_ecdf(df.speed)

# 1. Histogram comparison
fig = plt.figure()
df['speed'].hist(bins=arange(0, df.speed.max()), alpha=0.5, label='Data', normed=True)             
plot(x, y_weibull, '-', color='black',label='Weibull')   
plt_configure(figsize=(4,3),xlabel='V',ylabel='PDF', legend=True)

# 2. CDF comparison
fig = plt.figure()
plot(log(x), log(-log(1-y_ecdf)),'o', label='ECDF')
plot(log(x), log(-log(1-y_cdf_weibull)),'-', label='Weibull')
plt_configure(xlabel="ln(V)", ylabel="ln(-ln(1-P)",legend={'loc':'best'}, figsize=(4,3))
align_figures()
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:11: RuntimeWarning: divide by zero encountered in log
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:12: RuntimeWarning: divide by zero encountered in log
In [38]:
df.plot(kind='scatter', x='x', y='y', alpha=0.05, s=2)
plt.gca().set_aspect('equal')
plt_configure(figsize=(3.2,3.2),xlabel='x'+speed_unit_text, ylabel='y'+speed_unit_text)

2.2 Overview by Direction

In [39]:
if len(effective_column) == 16:
    rebinned_angle = 22.5
else: 
    rebinned_angle = 10
In [40]:
original_incre, incre = SECTOR_LENGTH, rebinned_angle
start, end = -original_incre/2 + incre/2, 360

max_speed = df.speed.max()
max_count = max_count_for_angles(df, start, end, incre)
plot_range = [0, max_speed, 0, max_count*1.05]

for angle in arange(start, end, incre):
    start_angle, end_angle = angle-incre/2, angle+incre/2
    sub_df, sub_max_speed = select_df_by_angle(df, start_angle, end_angle)   
    
    fig = plt.figure()
    sub_df['speed'].hist(bins=arange(0, max_speed), alpha=0.5, label='Data')
    title ='%s (%s - %s), %s' % (angle, start_angle, end_angle, len(sub_df)) 
    plt.axis(plot_range)
    plt_configure(figsize=(3,1.5), title=title)
align_figures()
D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\pyplot.py:524: RuntimeWarning: More than 20 figures have been opened. Figures created through the pyplot interface (`matplotlib.pyplot.figure`) are retained until explicitly closed and may consume too much memory. (To control this warning, see the rcParam `figure.max_open_warning`).
  max_open_warning, RuntimeWarning)

2.3 Overview by Month

In [41]:
month_incre = 1
current_df = df.query('speed>=1')
for month in arange(1, 12+month_incre, month_incre): 
    end_month = month+month_incre
    sub_df = current_df.query('(month >= @month) and (month < @end_month)')
    if len(sub_df) > 0:
        if month_incre == 1:
            title = 'Month: %s' % (month)
        else:
            title = 'Month: %s - %s ' % (month, end_month-1)
        ax = WindroseAxes.from_ax()
        ax.bar(sub_df.dir_windrose, sub_df.speed, normed=True, opening=0.8, edgecolor='white', nsector=36, cmap=plt.get_cmap('viridis'))
        plt_configure(figsize=(3,3), title=title)
align_figures()
D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\cbook.py:136: MatplotlibDeprecationWarning: The axisbg attribute was deprecated in version 2.0. Use facecolor instead.
  warnings.warn(message, mplDeprecation, stacklevel=1)

3. Create input data and configuration

In [42]:
SPEED_SET = array(list(zip(df.x, df.y)))
if 'NUMBER_OF_GAUSSIAN' not in globals():
    NUMBER_OF_GAUSSIAN = 3
FIT_METHOD = 'square_error'
DEFAULT_BANDWDITH = 1.5 if knot_unit else 0.7
fig_list = []
In [43]:
fit_limit = ceil(df['speed'].quantile(.95))
fitting_axis_range = arange(-fit_limit, fit_limit+1, 1)
print(fitting_axis_range)

FITTING_RANGE = []
for i in fitting_axis_range:
    for j in fitting_axis_range:
        FITTING_RANGE.append([i,j])
[-28 -27 -26 -25 -24 -23 -22 -21 -20 -19 -18 -17 -16 -15 -14 -13 -12 -11
 -10  -9  -8  -7  -6  -5  -4  -3  -2  -1   0   1   2   3   4   5   6   7
   8   9  10  11  12  13  14  15  16  17  18  19  20  21  22  23  24  25
  26  27  28]
In [44]:
plot_limit = ceil(df['speed'].quantile(.95))
PLOT_AXIS_RANGE = arange(-plot_limit, plot_limit+1, 1)

4. Kernel Density Estimation

In [45]:
sample = SPEED_SET
KDE_KERNEL = 'gaussian'
# KDE_KERNEL, bandwidth = 'tophat', 1
In [46]:
%%time
from sklearn.grid_search import GridSearchCV
# from sklearn.model_selection import GridSearchCV  ## too slow

# The bandwidth value sometimes would be too radical
if knot_unit:
    bandwidth_range = arange(0.7,2,0.2)
else:
    bandwidth_range = arange(0.4,1,0.1)

# Grid search is unable to deal with too many data (a long time is needed)
if len(sample) > 50000:    
    df_resample=df.sample(n=50000, replace=True)
    bandwidth_search_sample = array(list(zip(df_resample.x, df_resample.y)))
else:
    bandwidth_search_sample = sample

grid = GridSearchCV(neighbors.KernelDensity(kernel = KDE_KERNEL),
                    {'bandwidth': bandwidth_range}, n_jobs=-1, cv=4) 

grid.fit(bandwidth_search_sample)
bandwidth = grid.best_params_['bandwidth']
print(bandwidth)
D:\ProgramData\Anaconda3\lib\site-packages\sklearn\cross_validation.py:44: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. Also note that the interface of the new CV iterators are different from that of this module. This module will be removed in 0.20.
  "This module will be removed in 0.20.", DeprecationWarning)
D:\ProgramData\Anaconda3\lib\site-packages\sklearn\grid_search.py:43: DeprecationWarning: This module was deprecated in version 0.18 in favor of the model_selection module into which all the refactored classes and functions are moved. This module will be removed in 0.20.
  DeprecationWarning)
1.9
Wall time: 2min
In [47]:
if 'bandwidth' not in globals():
    bandwidth = DEFAULT_BANDWDITH

kde = neighbors.KernelDensity(bandwidth=bandwidth, kernel = KDE_KERNEL).fit(sample)

points = FITTING_RANGE
# very slow if the dataset is too large, e.g. 100,000
# kde returns log prob, need to convert it
kde_result = exp(kde.score_samples(points))
print('bandwidth:', bandwidth, len(kde_result))
print(kde_result[:5])
bandwidth: 1.9 3249
[  3.54707622e-06   4.06802110e-06   4.82677117e-06   5.94912339e-06
   7.39006735e-06]
In [48]:
# Plot jPDF
X = Y = PLOT_AXIS_RANGE
# Can't work if pass as generate_Z_from_X_Y(X,Y, exp(kde.score_samples())), need to use lambda
# see http://stackoverflow.com/questions/21035437/passing-a-function-as-an-argument-in-python
kde_Z = generate_Z_from_X_Y(X,Y, lambda coords: exp(kde.score_samples(coords)))
colorbar_lim = 0, kde_Z.max()

plot_3d_prob_density(X,Y,kde_Z)

fig_kde,ax1 = plt.subplots(figsize=(3.5,2.5))
plot_2d_prob_density(X,Y,kde_Z,xlabel='x'+speed_unit_text, ylabel='y'+speed_unit_text, ax=ax1)

with sns.axes_style({'axes.grid' : False}):
    from matplotlib import ticker
    fig_hist,ax2 = plt.subplots(figsize=(3.5,2.5))
    _,_,_,image = ax2.hist2d(df.x, df.y, bins=PLOT_AXIS_RANGE, cmap='viridis',)
    ax2.set_aspect('equal')
    cb = plt.colorbar(image)
    tick_locator = ticker.MaxNLocator(nbins=6)
    cb.locator = tick_locator
    cb.update_ticks()
    plt_configure(ax=ax2, xlabel='x'+speed_unit_text,ylabel='y'+speed_unit_text)
align_figures()
In [49]:
kde_cdf = cdf_from_pdf(kde_result)

5. GMM by Expectation-maximization

In [50]:
sample= SPEED_SET
clf = mixture.GaussianMixture(n_components=NUMBER_OF_GAUSSIAN, covariance_type='full')
clf.fit(sample)
print(clf.converged_)
True
In [51]:
gmm_em_result = read_gmm_em_result(clf)
pretty_print_gmm(gmm_em_result)
Out[51]:
weight mean_x mean_y sig_x sig_y corr
1 0.363 -10.773 -3.289 8.385 9.414 -0.160
2 0.320 0.464 6.959 7.218 8.076 0.121
3 0.316 4.759 -12.387 9.044 8.378 -0.138
In [52]:
fig,ax = plt.subplots(figsize=(3.5,3.5))
plot_gmm_ellipses(gmm_em_result, ax=ax, xlabel='x'+speed_unit_text, ylabel='y'+speed_unit_text)
GMM Plot Result
0.363287458223 [[-10.77289751  -3.28903359]] [ 7.99226851  9.74901044] -152.997847827
0.320344535505 [[ 0.46359941  6.95853713]] [ 7.00178882  8.26328307] 156.474572317
0.316368006272 [[  4.75897168 -12.38694987]] [ 8.00195946  9.37837198] -120.502169707
In [53]:
X = Y = PLOT_AXIS_RANGE
pdf_Z = generate_Z_from_X_Y(X,Y, lambda coords: exp(clf.score_samples(coords)))

def residule_between_kde_and_gmm(points):
    kde_vals = exp(kde.score_samples(points))
    gmm_vals = exp(clf.score_samples(points))
    return kde_vals - gmm_vals 

residual_Z = generate_Z_from_X_Y(X,Y, residule_between_kde_and_gmm)

plot_3d_prob_density(X,Y,pdf_Z)
plot_3d_prob_density(X,Y,residual_Z)
align_figures()

fig = plt.figure(figsize=(3.5,2.5))
plot_2d_prob_density(X,Y,kde_Z,xlabel='x'+speed_unit_text, ylabel='y'+speed_unit_text, colorbar_lim=colorbar_lim)
fig_em = plt.figure(figsize=(3.5,2.5))
plot_2d_prob_density(X,Y,pdf_Z,xlabel='x'+speed_unit_text, ylabel='y'+speed_unit_text, colorbar_lim=colorbar_lim)
fig = plt.figure(figsize=(3.5,2.5))
plot_2d_prob_density(X,Y,residual_Z,
                     xlabel='x'+speed_unit_text, ylabel='y'+speed_unit_text)
align_figures()

Goodness-of-fit Statistics

In [54]:
points = FITTING_RANGE
gmm_pdf_result = exp(clf.score_samples(points))
gof_df(gmm_pdf_result, kde_result)
Out[54]:
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.974 0.013 0.022 2.775239e-09 0.041 0.176

6. GMM by Optimization

In [55]:
sample = SPEED_SET
points = FITTING_RANGE
max_speed = df.speed.max()
print(FIT_METHOD)
square_error
In [56]:
# from GMM,EM 
# GMM format: weight, meanx, meany, sigx, sigy, rho
x0 = gmm_em_result

cons = [
        # sum of every 6th element, which is the fraction of each gaussian
        {'type': 'eq', 'fun': lambda x: sum(x[::6]) - 1},
        # # limit the width/height ratio of elliplse, optional
#         {'type': 'ineq', 'fun': lambda x: width_height_ratios_set(x) - 1/3},
#         {'type': 'ineq', 'fun': lambda x: 3 - width_height_ratios_set(x)},
]

bonds = [(0., 0.99),(-fit_limit, fit_limit),(-fit_limit, fit_limit),
         (0., fit_limit),(0., fit_limit),(-0.99, 0.99)]*(len(x0)//6)

result = sp.optimize.minimize(
    lambda x0: GMM_fit_score(x0, kde_result, points, FIT_METHOD),
    x0,
    bounds = bonds,
    constraints=cons,
    tol = 0.000000000001,
    options = {"maxiter": 500})
result
Out[56]:
     fun: -19.982982445546732
     jac: array([  2.01938009e+00,   0.00000000e+00,   0.00000000e+00,
        -2.38418579e-07,  -2.38418579e-07,  -2.38418579e-07,
         2.01937032e+00,   0.00000000e+00,   0.00000000e+00,
         0.00000000e+00,   2.38418579e-07,  -4.76837158e-07,
         2.01936769e+00,   4.76837158e-07,   2.38418579e-07,
         0.00000000e+00,   0.00000000e+00,   2.38418579e-07,
         0.00000000e+00])
 message: 'Optimization terminated successfully.'
    nfev: 1492
     nit: 74
    njev: 74
  status: 0
 success: True
       x: array([  0.11653526,  10.30402936, -12.90355993,   6.9770382 ,
         6.27260499,  -0.33508017,   0.20837502,   1.99080946,
         8.92009669,   6.75093584,   6.86271249,  -0.10449288,
         0.67508972,  -6.27144055,  -4.49638659,   9.88907743,
        10.23846161,  -0.21933133])

6.1 GMM Result

In [57]:
gmm = group_gmm_param_from_gmm_param_array(result.x, sort_group = True)
mixed_model_pdf = generate_gmm_pdf_from_grouped_gmm_param(gmm)
gmm_pdf_result = mixed_model_pdf(points)
pretty_print_gmm(gmm)
Out[57]:
weight mean_x mean_y sig_x sig_y corr
1 0.675 -6.271 -4.496 9.889 10.238 -0.219
2 0.208 1.991 8.920 6.751 6.863 -0.104
3 0.117 10.304 -12.904 6.977 6.273 -0.335
In [58]:
fig_gmm, ax = plt.subplots(figsize=(3.5,3.5))
plot_gmm_ellipses(gmm, ax=ax, xlabel='x'+speed_unit_text, ylabel='y'+speed_unit_text)
GMM Plot Result
0.675089720503 [[-6.27144055 -4.49638659]] [  8.8784161   11.12626065] -139.498575824
0.20837501835 [[ 1.99080946  8.92009669]] [ 6.43703568  7.15796962] -139.465846116
0.116535261146 [[ 10.30402936 -12.90355993]] [ 5.3500605   7.70723608] -126.173586788

6.2 Goodness-of-fit statistics

In [59]:
gof_df(gmm_pdf_result, kde_result)
Out[59]:
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.981 0.015 0.054 2.096530e-09 0.036 0.153
In [60]:
X = Y = PLOT_AXIS_RANGE
pdf_Z = generate_Z_from_X_Y(X,Y, mixed_model_pdf)# passing a function as an argument

def residule_between_kde_and_gmm(points):
    kde_vals = exp(kde.score_samples(points))
    gmm_vals = mixed_model_pdf(points)
    return kde_vals - gmm_vals 

residual_Z = generate_Z_from_X_Y(X,Y, residule_between_kde_and_gmm)

plot_3d_prob_density(X,Y,pdf_Z)
plot_3d_prob_density(X,Y,residual_Z)
align_figures()

fig = plt.figure(figsize=(3.5,2.5))
plot_2d_prob_density(X,Y,kde_Z, xlabel='x'+speed_unit_text, ylabel='y'+speed_unit_text)
fig_gmm = plt.figure(figsize=(3.5,2.5))
plot_2d_prob_density(X,Y,pdf_Z, xlabel='x'+speed_unit_text, ylabel='y'+speed_unit_text)
fig = plt.figure(figsize=(3.5,2.5))
plot_2d_prob_density(X,Y,residual_Z,  xlabel='x'+speed_unit_text, ylabel='y'+speed_unit_text)
align_figures()
In [61]:
def f(V,theta):
    return (mixed_model_pdf([[V*cos(theta),V*sin(theta)]]))*V
In [62]:
x, y_weibull, y_cdf_weibull, weibull_params, y_ecdf = fit_weibull_and_ecdf(df.speed)

# 3. GMM distribution
y_ = [integrate.nquad(f, [[0, x_val],[0, 2*pi]]) for x_val in x]
y_cdf_gmm = array(list(zip(*y_))[0])

plot(log(x), log(-log(1-y_ecdf)),'o', label = 'Empirical')
plot(log(x), log(-log(1-y_cdf_weibull)),'--', label = 'Weibull')
plot(log(x), log(-log(1-y_cdf_gmm)),'-', color='black', label = 'GMM')
plt_configure(xlabel='ln(V)',ylabel='ln(-ln(1-P))',legend={'loc':'best'})
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:7: RuntimeWarning: divide by zero encountered in log
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:8: RuntimeWarning: divide by zero encountered in log
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:9: RuntimeWarning: divide by zero encountered in log
In [63]:
# Calculate Speed Distribution
# 1. GMM Model
x = arange(0, max_speed, 0.5)
y_ =[integrate.nquad(f, [[x_-0.01, x_+0.01],[0, 2*pi]]) for x_ in x]
y_gmm = array(list(zip(*y_))[0])*len(df.speed)/0.02

# 2. Weibull
y_weibul = sp.stats.weibull_min.pdf(x, *weibull_params)

df['speed'].hist(bins=arange(0, df.speed.max()), alpha=0.5, label='Data')
plot(x, y_gmm,'-', color='black', label='GMM')
plot(x, y_weibul*len(df.speed), '--', color='black', label='Weibull') 
print('Speed Distribution Comparison')
plt_configure(xlabel='Speed'+speed_unit_text,
              ylabel='Frequency',legend=True, figsize=(4, 2))
plt.gca().set_ylim(bottom = 0)
plt.tight_layout()
plt.locator_params(axis='y', nbins=5)
Speed Distribution Comparison
In [64]:
# Calculate Angle Distribution
x = linspace(0,2*pi, num=36+1)
y_ =[integrate.nquad(f, [[0, inf],[x_-pi/36, x_+pi/36]]) for x_ in x]
y = array(list(zip(*y_))[0])*len(df['dir']) 

df['dir'].hist(bins=DIR_BIN, alpha=0.5, label='Data')
plot(x/pi*180, y,'-', color='black', label='GMM')
title='Direction Distribution Comparison'
plt_configure(xlabel='Direction'+dir_unit_text, ylabel='Frequency', 
              legend={'loc': 'best'} ,tight='xtight',figsize = (4,2))
plt.tight_layout()
dir_fig = plt.gcf()
print(title)
Direction Distribution Comparison
In [65]:
# %%time
incre = max(SECTOR_LENGTH, 10)
density_collection=Parallel(n_jobs=-1)(delayed(direction_compare)(gmm, df, angle, incre) 
                                        for angle in arange(0, 360, incre))  
# This R square is computed as in paper 
# Comparison of bivariate distribution constructionapproaches for analysing wind speed anddirection data
# http://onlinelibrary.wiley.com/doi/10.1002/we.400/full
print(true_R_square(density_collection))
0.90002720359

6.3 Sectoral Comaprison

In [66]:
# Calculate Speed Distribution
def model_data_comparison(df, original_incre = 10, incre = 10):
    start, end = -original_incre/2 + incre/2, 360
    max_diff_array = []
    curve_collection = []
    max_speed = df.speed.max()
    
    # Find a max count for plotting histogram
    max_count = max_count_for_angles(df, start, end, incre)
    plot_range = [0, max_speed, 0, max_count*1.05]
    
    for angle in arange(start, end, incre):
        angle_radian, incre_radian = radians(angle), radians(incre)  
        start_angle, end_angle = angle-incre/2, angle+incre/2
        
        # Select data from observation
        sub_df, sub_max_speed = select_df_by_angle(df, start_angle, end_angle)
        data_size = len(sub_df.speed)
        # 1. Get Weibull and ECDF
        x, y_weibull, y_cdf_weibull, weibull_params, y_ecdf = fit_weibull_and_ecdf(sub_df.speed)
        # 2. Get GMM PDF, CDF
        _, y_gmm, y_cdf_gmm, direction_prob = gmm_integration_in_direction(f, angle_radian-incre_radian/2, angle_radian+incre_radian/2, x)
        
        # 3. Make Plots
        fig = plt.figure(figsize=(10,1.9))
#         fig = plt.figure(figsize=(10,1.7))
        # 3.1. Frequency Comparison
        ax1 = fig.add_subplot(1,3,1)        
        sub_df['speed'].hist(bins=arange(0, sub_max_speed), alpha=0.5, label='Data')                  
        plot(x, y_gmm*data_size,'-', color='black', label='GMM')
        plot(x, y_weibull*data_size, '--', color='black',label='Weibull')   
#         plt_configure(xlabel = "$V$", ylabel='Frequency', legend=True)
        plt_configure(xlabel = "V", ylabel='Frequency', legend=True)
        plt.axis(plot_range)
        
        # 3.2. CDF Comaprison
        ax2 = fig.add_subplot(1,3,2)
        plot(x, y_ecdf,'o', alpha=0.8, label='Data')
        plot(x, y_cdf_gmm,'-', color='black',label='GMM')
        plot(x, y_cdf_weibull,'--', color='black',label='Weibull')
        plt.gca().set_xlim(right = max_speed)
#         plt_configure(xlabel = "$V$", ylabel='$P$', legend=True)
        plt_configure(xlabel = "V", ylabel='P', legend=True)
        
        # 3.3. Weibull Comparison
#         ax3 = fig.add_subplot(1,3,3)
#         plot(log(x), log(-log(1-y_ecdf)),'o', alpha=0.8, label='Data')
#         plot(log(x), log(-log(1-y_cdf_gmm)),'-', color='black', label='GMM')
#         plot(log(x), log(-log(1-y_cdf_weibull)),'--',color='black',label='Weibull')
#         plt.gca().set_xlim(right = log(max_speed+1))
# #         plt_configure(xlabel="ln($V$)", ylabel="ln(-ln(1-$P$)",legend={'loc':'best'})
#         plt_configure(xlabel="ln(V)", ylabel="ln(-ln(1-P)",legend={'loc':'best'})
        
        bins = arange(0, sub_df.speed.max()+1)
        density, _ = np.histogram(sub_df['speed'], bins=bins, normed=True)
        density_expected_ =[integrate.nquad(f, [[x_, x_+1],[angle_radian-incre_radian/2, angle_radian+incre_radian/2]]) 
                            for x_ in bins[:-1]]
        density_expected_gmm = array(list(zip(*density_expected_ ))[0])/direction_prob
        R_square_gmm = sector_r_square(density, density_expected_gmm)
        
        density_expected_weibull = sp.stats.weibull_min.cdf(bins[1:], *weibull_params) - sp.stats.weibull_min.cdf(bins[:-1], *weibull_params) 
        R_square_weibull = sector_r_square(density, density_expected_weibull)

        diff, diff_weibull= np.abs(y_ecdf - y_cdf_gmm), np.abs(y_ecdf - y_cdf_weibull)
        max_diff_array.append([len(sub_df), angle, diff.max(), x[diff.argmax()], 
                               diff_weibull.max(), x[diff_weibull.argmax()], R_square_gmm, R_square_weibull])
        curves = {'angle': angle, 'data_size': data_size, 'weight': direction_prob, 
                  'x': x, 'gmm_pdf': y_gmm, 'gmm_cdf': y_cdf_gmm,
                  'weibull_pdf': y_weibull, 'weibull_cdf': y_cdf_weibull, 'ecdf': y_ecdf}
        curve_collection.append(curves)
        
        plt.tight_layout()
        plt.show()
        print('%s (%s - %s) degree' % (angle, start_angle, end_angle))
        print('data size:', len(sub_df), 'weight', len(sub_df)/len(df))
        print('GMM', 'Weibull')
        print('R square', R_square_gmm,  R_square_weibull)
        print('max diff:', diff.max(), diff_weibull.max(), 'speed value:', x[diff.argmax()], x[diff_weibull.argmax()], 'y gmm', y_cdf_gmm[diff.argmax()])
        print(' ')
    return max_diff_array, curve_collection
In [67]:
%%time
if len(effective_column) == 16:
    rebinned_angle = 22.5
else: 
    rebinned_angle = 20
    
max_diff_array, curve_collection = model_data_comparison(df, SECTOR_LENGTH, rebinned_angle)
5.0 (-5.0 - 15.0) degree
data size: 689 weight 0.01608422625300558
GMM Weibull
R square 0.754426820038 0.831608151092
max diff: 0.0901067828948 0.0915948110547 speed value: 8.03248362426 4.01624181213 y gmm 0.41497304294
 
25.0 (15.0 - 35.0) degree
data size: 1348 weight 0.03146812335130845
GMM Weibull
R square 0.75363033492 0.846683913236
max diff: 0.141078863028 0.0583786230949 speed value: 9.53232276954 4.76616138477 y gmm 0.482066537566
 
45.0 (35.0 - 55.0) degree
data size: 1339 weight 0.031258024604897634
GMM Weibull
R square 0.789643553727 0.878481820715
max diff: 0.0986794463708 0.0528811351021 speed value: 9.92837449001 3.97134979601 y gmm 0.430820180216
 
65.0 (55.0 - 75.0) degree
data size: 2180 weight 0.05089058524173028
GMM Weibull
R square 0.930740236859 0.973725540168
max diff: 0.0786292061942 0.0114119810429 speed value: 17.6615695668 15.6991729482 y gmm 0.829087921791
 
85.0 (75.0 - 95.0) degree
data size: 2183 weight 0.05096061815720055
GMM Weibull
R square 0.961325345456 0.960337427566
max diff: 0.0265786236229 0.0383071234819 speed value: 6.13516948216 16.3604519524 y gmm 0.135602902139
 
105.0 (95.0 - 115.0) degree
data size: 1983 weight 0.046291757125849146
GMM Weibull
R square 0.935688142746 0.944230841525
max diff: 0.0419501990284 0.0502560542189 speed value: 5.81148566086 13.5601332087 y gmm 0.126670320057
 
125.0 (115.0 - 135.0) degree
data size: 2007 weight 0.046852020449611315
GMM Weibull
R square 0.941833922165 0.94740853892
max diff: 0.0396316006733 0.0356002030638 speed value: 14.9741648616 4.99138828722 y gmm 0.642481005206
 
145.0 (135.0 - 155.0) degree
data size: 2353 weight 0.054929150033849246
GMM Weibull
R square 0.921550472453 0.937614722862
max diff: 0.0659916513689 0.0338973970191 speed value: 15.9433881543 15.9433881543 y gmm 0.614841327807
 
165.0 (155.0 - 175.0) degree
data size: 2785 weight 0.06501388986156827
GMM Weibull
R square 0.962929423929 0.966465002817
max diff: 0.0253093491758 0.0117503550028 speed value: 13.6384478367 13.6384478367 y gmm 0.45081273341
 
185.0 (175.0 - 195.0) degree
data size: 3167 weight 0.07393141443144945
GMM Weibull
R square 0.95804433163 0.962758615822
max diff: 0.0365080153691 0.0234552853567 speed value: 20.3855975003 14.5611410716 y gmm 0.762671018417
 
205.0 (195.0 - 215.0) degree
data size: 3267 weight 0.07626584494712516
GMM Weibull
R square 0.979059211617 0.978529444298
max diff: 0.0207680055881 0.0116953443215 speed value: 23.8885394226 23.8885394226 y gmm 0.888322903503
 
225.0 (215.0 - 235.0) degree
data size: 3333 weight 0.0778065690874711
GMM Weibull
R square 0.96450315775 0.966395710531
max diff: 0.0191026792697 0.0105737319655 speed value: 9.02211537273 13.5331730591 y gmm 0.218221053104
 
245.0 (235.0 - 255.0) degree
data size: 2665 weight 0.06221257324275743
GMM Weibull
R square 0.914932762234 0.94074480005
max diff: 0.0533064373952 0.0177874252811 speed value: 9.9377950926 4.9688975463 y gmm 0.265267671423
 
265.0 (255.0 - 275.0) degree
data size: 3307 weight 0.07719961715339543
GMM Weibull
R square 0.939800787043 0.964578257304
max diff: 0.0652078589905 0.0168074761705 speed value: 17.8748034932 12.7677167808 y gmm 0.679359658204
 
285.0 (275.0 - 295.0) degree
data size: 3132 weight 0.07311436375096295
GMM Weibull
R square 0.913361436173 0.955857320852
max diff: 0.0428530814631 0.0215317225827 speed value: 25.0817711319 12.5408855659 y gmm 0.906199186188
 
305.0 (295.0 - 315.0) degree
data size: 3596 weight 0.08394612134369821
GMM Weibull
R square 0.97089993716 0.979917003758
max diff: 0.0234043491863 0.0180719748993 speed value: 10.1301276699 12.6626595873 y gmm 0.204716918708
 
325.0 (315.0 - 335.0) degree
data size: 2358 weight 0.05504587155963303
GMM Weibull
R square 0.924894968128 0.948124777129
max diff: 0.0537748033828 0.0240410549659 speed value: 11.2829118227 24.82240601 y gmm 0.295080995071
 
345.0 (335.0 - 355.0) degree
data size: 950 weight 0.02217708989891916
GMM Weibull
R square 0.908039886645 0.910774971923
max diff: 0.0382987419402 0.0325397817988 speed value: 6.91393707589 6.91393707589 y gmm 0.256438100165
 
Wall time: 1min 6s
In [68]:
diff_df = pd.DataFrame(max_diff_array,columns=['datasize','direction', 'gmm', 'speed_gmm',
                                               'weibull', 'speed_weibull', 'r_square_gmm', 'r_square_weibull'])  

gmm_mean, weibull_mean = plot_sectoral_comparison(diff_df.r_square_gmm, diff_df.r_square_weibull, diff_df.direction, diff_df.datasize)
plt_configure(ylabel="$\ R^2$", xlabel='Direction'+dir_unit_text)
ylim = min(plt.gca().get_ylim()[0],0.75)
plt.gca().set_ylim(top=1, bottom=ylim)
plt.tight_layout()
print(gmm_mean, weibull_mean)
0.9311400429475094 0.9512616666292201
In [69]:
gmm_mean, weibull_mean = plot_sectoral_comparison(diff_df.gmm, diff_df.weibull, diff_df.direction, diff_df.datasize)
plt_configure(ylabel="K-S", xlabel='Direction'+dir_unit_text)
ylim = max(plt.gca().get_ylim()[1],0.25)
plt.gca().set_ylim(top=ylim, bottom=0)
plt.tight_layout()
print(gmm_mean, weibull_mean)
0.04663761411285676 0.025220480074541262
In [70]:
# Compare direction weight with previous figure
display(dir_fig)

6.4 Insufficient-fit Sector Investigation

6.4.1 Data Variability, by Bootstrap (Resampling)

In [71]:
max_diff_element = max(max_diff_array, key=lambda x: x[2])
angle =  max_diff_angle = max_diff_element[1]
incre = rebinned_angle
In [72]:
FRACTION = 1

# Select data from observation
start_angle, end_angle = angle-incre/2, angle+incre/2
angle_radian, incre_radian = radians(angle), radians(incre)  
sub_df, sub_max_speed = select_df_by_angle(df, start_angle, end_angle)
In [73]:
x = arange(0, sub_max_speed, 0.5)
_, y_weibull, y_cdf_weibull, weibull_params, y_ecdf = fit_weibull_and_ecdf(sub_df.speed, x)
_, y_gmm, y_cdf_gmm, direction_prob = gmm_integration_in_direction(f, angle_radian-incre_radian/2, angle_radian+incre_radian/2, x)

fig = plt.figure(figsize=(10,1.9))
ax1 = fig.add_subplot(1,3,1)   
ax2 = fig.add_subplot(1,3,2)   
ax3 = fig.add_subplot(1,3,3)   

# 1. Data
bins=arange(0, sub_max_speed)
sub_df['speed'].hist(ax=ax1, bins=bins, alpha=0.5, label='Data', normed=True)  

# 2. GMM
ax1.plot(x, y_gmm,'-', color='black', label='GMM')
ax2.plot(x, y_cdf_gmm,'-', color = 'black', label='GMM')
ax3.plot(log(x), log(-log(1-y_cdf_gmm)),'-', color = 'black',label='GMM')

# 3. Weilbull 
ax1.plot(x, y_weibull,'--',color='black',label='Weibull')
ax2.plot(x, y_cdf_weibull,'--',label='Weibull')
ax3.plot(log(x), log(-log(1-y_cdf_weibull)),'--',label='Weibull')

# 4. Data Resampled
count_collection = []
for i in range(1,100):
    sub_df_resampled = sub_df.sample(frac=FRACTION, replace=True)    
    resampled_count, _ = np.histogram(sub_df_resampled['speed'], bins=bins, normed=True) 
    count_collection.append(resampled_count)
    
    ecdf = sm.distributions.ECDF(sub_df_resampled.speed)
    y_ecdf = ecdf(x) 
    ax2.plot(x, y_ecdf,':', label='Data Resampled')
    ax3.plot(log(x), log(-log(1-y_ecdf)),':', label='Data Resampled')
    if i == 1: 
#         plt_configure(ax=ax2, xlabel = "$V$", ylabel='$P$', legend={'loc':'best'})
#         plt_configure(ax=ax3, xlabel="ln($V$)", ylabel="ln(-ln(1-$P$)",legend={'loc':'best'})
        plt_configure(ax=ax2, xlabel = "V", ylabel='P', legend={'loc':'best'})
        plt_configure(ax=ax3, xlabel="ln(V)", ylabel="ln(-ln(1-P)",legend={'loc':'best'})

print('%s (%s - %s) Degree Speed Distribution' % (angle, start_angle, end_angle))
count_collection = np.array(count_collection)
mx, mn = np.max(count_collection,0), np.min(count_collection,0)
ax1.plot(bins[1:]-0.5, mx, ':', color='blue')
ax1.plot(bins[1:]-0.5, mn, ':', color='blue', label='Resample limit')
ax1.set_ylim(bottom = 0)
# plt_configure(ax=ax1, xlabel='$V$',ylabel='Frequency',legend={'loc':'best'})
plt_configure(ax=ax1, xlabel='V', ylabel='Frequency',legend={'loc':'best'})
ax1.locator_params(axis='y', nbins=5)
ax2.locator_params(axis='y', nbins=5)
ax3.locator_params(axis='y', nbins=5)
plt.tight_layout()
diff = abs(y_ecdf - y_cdf_gmm)
print(diff.max(), x[diff.argmax()], y_cdf_gmm[diff.argmax()])
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:17: RuntimeWarning: divide by zero encountered in log
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:22: RuntimeWarning: divide by zero encountered in log
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:34: RuntimeWarning: divide by zero encountered in log
25.0 (15.0 - 35.0) Degree Speed Distribution
0.112273164729 9.0 0.442622977704

6.4.2 Time Variability

In [74]:
fig_time_variability_3d = plt.figure()
ax1 = fig_time_variability_3d.gca(projection='3d')

fig_time_variability_cdf,ax2 = plt.subplots(figsize=(3,1.8))
fig_time_variability_weibull, ax3 = plt.subplots(figsize=(3,1.8))

ax2.plot(x, y_cdf_gmm,'-', color='black', label = 'GMM')
ax2.plot(x, y_cdf_weibull,'--', label='Weibull')

ax3.plot(log(x), log(-log(1-y_cdf_gmm)),'-', color='black',label='GMM')
ax3.plot(log(x), log(-log(1-y_cdf_weibull)), '--', label='Weibull')

# 3. Data
prop_cycle=iter(mpl.rcParams['axes.color_cycle'])
for start_time in range(20000000, 20150000, 50000):
    end_time = start_time + 50000 
    time_label = start_time//10000
    df_other_years = df_all_years.query('(date >= @start_time) & (date < @end_time)')
    df_other_years_at_angle, sub_max_speed_other_year = select_df_by_angle(df_other_years, start_angle, end_angle)
    if len(df_other_years_at_angle) > 0 :
        
        ecdf = sm.distributions.ECDF(df_other_years_at_angle.speed)
        y_ecdf = ecdf(x)
        ax2.plot(x, y_ecdf,':', label = time_label)
        ax3.plot(log(x), log(-log(1-y_ecdf)),':', label = time_label)
        
        title = '%s - %s' %(time_label, time_label+4)
        count, division = np.histogram(df_other_years_at_angle['speed'], normed=True,
                                       bins=arange(0, sub_max_speed_other_year))
        ax1.bar(left=division[:-1], height=count, zs=time_label, zdir='x', 
                color=next(prop_cycle), alpha=0.8)
        x_3d = time_label*np.ones_like(x)
        ax1.plot(x_3d, x, y_gmm, '-', color='black', label='GMM'  if time_label == 2010 else '')
        ax1.plot(x_3d, x, y_weibull, '--', color='blue', label='Weibull' if time_label == 2010 else '')
        
print('%s (%s - %s) Degree Speed Distribution' % (angle, start_angle, end_angle))
ax1.set_ylim(bottom = 0)
ax1.set_zlabel('Frequency')
plt_configure(ax=ax1, xlabel='Time',ylabel='V', legend=True)
# plt_configure(ax=ax2, xlabel = "$V$", ylabel='$P$', legend={'loc':'best'})
# plt_configure(ax=ax3, xlabel="ln($V$)", ylabel="ln(-ln(1-$P$)", legend={'loc':'best'})
plt_configure(ax=ax2, xlabel = "V", ylabel='P', legend={'loc':'best'})
plt_configure(ax=ax3, xlabel="ln(V)", ylabel="ln(-ln(1-P)", legend={'loc':'best'})

ax1.set_zlim(bottom = 0)
align_figures()
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:10: RuntimeWarning: divide by zero encountered in log
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:11: RuntimeWarning: divide by zero encountered in log
D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\__init__.py:938: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.
  warnings.warn(self.msg_depr % (key, alt_key))
D:\ProgramData\Anaconda3\lib\site-packages\ipykernel\__main__.py:25: RuntimeWarning: divide by zero encountered in log
25.0 (15.0 - 35.0) Degree Speed Distribution

6.4.3 Adjacent Sector Variability

In [75]:
incre = rebinned_angle
angle_group = [max_diff_angle-incre, max_diff_angle, max_diff_angle+incre]
In [76]:
fig_adjecent_variability_3d = plt.figure()
ax1 = fig_adjecent_variability_3d.gca(projection='3d')
fig_adjecent_variability_cdf, ax2 = plt.subplots(figsize=(3,1.8))
fig_adjecent_variability_weibull, ax3 = plt.subplots(figsize=(3,1.8))

legend_3d = False
prop_cycle=iter(mpl.rcParams['axes.color_cycle'])

curve_df = pd.DataFrame(curve_collection)

for angle in angle_group:
    curves = curve_df.query('angle == @angle%360').T.to_dict()
    curves = curves[list(curves)[0]]
    data_size, x =  curves['data_size'], curves['x']
    y_gmm, y_cdf_gmm =  curves['gmm_pdf'], curves['gmm_cdf'] 
    y_weibull, y_cdf_weibull, y_cdf = curves['weibull_pdf'],  curves['weibull_cdf'], curves['ecdf']

    linestyle = '-' if angle == max_diff_angle else ':'
    alpha = 0.7 if angle == max_diff_angle else 0.3

    ax2.plot(x, y_gmm*data_size, linestyle, label=angle)        
    ax3.plot(x, y_weibull*data_size, linestyle, label=angle)

    start_angle, end_angle = angle-incre/2, angle+incre/2
    sub_df, sub_max_speed = select_df_by_angle(df, start_angle, end_angle)

    x_3d = angle*np.ones_like(x)
    ax1.plot(x_3d, x, y_gmm*data_size, color='black', label='GMM')
    ax1.plot(x_3d, x, y_weibull*data_size, color='blue', linestyle='--',label='Weibull')

    count, division = np.histogram(sub_df['speed'], bins=arange(0, sub_max_speed))
    ax1.bar(left=division[:-1], height=count, zs=angle, zdir='x', color=next(prop_cycle), alpha=0.8)

    if legend_3d == False:
        ax1.legend()
        legend_3d = True
        
plt_configure(ax=ax1, xlabel='Direction', ylabel='Speed')   
plt_configure(ax=ax2, xlabel='V',ylabel='Frequency',legend={'loc':'best'})
plt_configure(ax=ax3, xlabel='V',ylabel='Frequency',legend={'loc':'best'})
ax1.set_zlabel('Frequency')
ax1.set_zlim(bottom = 0)
ylim = max(ax1.get_ylim()[1],ax3.get_ylim()[1])
ax2.set_ylim(bottom = 0, top=ylim)
ax3.set_ylim(bottom = 0, top=ylim)

print(max_diff_angle) 
print('GMM, Weibull, Histogram')
align_figures()
D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\__init__.py:938: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.
  warnings.warn(self.msg_depr % (key, alt_key))
25.0
GMM, Weibull, Histogram

7. Result Variability & Cross-Validation

In [77]:
if 'bandwidth' not in globals():
    bandwidth = DEFAULT_BANDWDITH    
if 'FIT_METHOD' not in globals():
    FIT_METHOD = 'square_error'       
if 'KDE_KERNEL' not in globals():
    KDE_KERNEL = 'gaussian'
    
config = {'bandwidth': bandwidth, 
          'fitting_range': FITTING_RANGE,
          'fit_limit': fit_limit,
          'kde_kernel': KDE_KERNEL}

print(bandwidth, FIT_METHOD)
1.9 square_error

7.1 Variability of the Result

In [78]:
%%time
results = Parallel(n_jobs=-1)(delayed(resampled_fitting)(df, FIT_METHOD, NUMBER_OF_GAUSSIAN, config) for i in range(10))                        
for result in results:
    display(pretty_print_gmm(result['gmm']))
    fig,ax = plt.subplots(figsize=(3.5,3.5))
    plot_gmm_ellipses(result['gmm'],ax=ax, xlabel='x'+speed_unit_text, ylabel='y'+speed_unit_text)
    plt.show()
    
    display(gof_df(result['gmm_pdf_result'], result['kde_result']))
    display(gof_df(result['gmm_pdf_result'], kde_result))
    print('')
weight mean_x mean_y sig_x sig_y corr
1 0.620 -4.087 -7.601 12.176 10.006 -0.335
2 0.317 -0.492 7.398 8.146 7.258 0.106
3 0.063 5.019 -9.431 10.385 5.299 -0.806
GMM Plot Result
0.61966351101 [[-4.08669609 -7.60050255]] [  8.76171279  13.10018839] -119.750459517
0.317216220672 [[-0.49231477  7.39773138]] [ 7.08793599  8.29420819] -68.7239303126
0.0631202683186 [[ 5.01896088 -9.43072101]] [  2.88332238  11.29654736] -114.021663432
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.980 0.007 0.026 2.233045e-09 0.036 0.157
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.980 0.007 0.024 2.148164e-09 0.036 0.154

weight mean_x mean_y sig_x sig_y corr
1 0.552 -5.760 2.939 9.892 10.409 0.369
2 0.400 2.384 -10.917 10.831 7.912 -0.261
3 0.047 4.410 5.951 3.481 4.875 -0.505
GMM Plot Result
0.552383622019 [[-5.76004118  2.93887832]] [  8.04527051  11.8944033 ] 138.932559572
0.400228157439 [[  2.3838007  -10.91730136]] [  7.39209939  11.19214222] -109.608170377
0.0473882205424 [[ 4.41012484  5.95118469]] [ 2.75329582  5.32013692] -152.107823837
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.983 0.008 0.027 1.877528e-09 0.033 0.144
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.982 0.008 0.027 1.975439e-09 0.034 0.148

weight mean_x mean_y sig_x sig_y corr
1 0.728 -2.836 -7.250 12.220 9.611 -0.364
2 0.245 -1.601 9.010 7.591 7.154 0.335
3 0.027 5.787 4.001 2.699 3.181 -0.247
GMM Plot Result
0.727720730728 [[-2.83563881 -7.2497048 ]] [  8.33653103  13.1225818 ] -118.157828893
0.244851800834 [[-1.60078979  9.00956884]] [ 5.99518924  8.53586069] -50.0351208033
0.0274274684374 [[ 5.78675191  4.00121538]] [ 2.47965353  3.3549954 ] -151.841700341
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.984 0.008 0.028 1.763023e-09 0.032 0.140
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.983 0.006 0.026 1.818284e-09 0.033 0.142

weight mean_x mean_y sig_x sig_y corr
1 0.677 -6.526 -4.008 9.659 10.233 -0.180
2 0.190 2.359 9.159 6.620 6.709 -0.149
3 0.133 10.018 -12.915 6.948 6.424 -0.356
GMM Plot Result
0.677050231934 [[-6.52566647 -4.00818054]] [  8.96181813  10.84832331] -143.893828182
0.189700238524 [[ 2.3589288   9.15884231]] [ 6.14589244  7.14591736] -137.55394473
0.133249529542 [[ 10.01810235 -12.91510521]] [ 5.33690965  7.8144357 ] -128.791065771
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.977 0.018 0.061 2.501298e-09 0.038 0.167
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.980 0.017 0.072 2.146350e-09 0.036 0.154

weight mean_x mean_y sig_x sig_y corr
1 0.661 -6.037 -5.154 10.015 10.035 -0.285
2 0.238 1.730 8.520 7.020 7.045 -0.089
3 0.101 10.719 -12.956 6.699 5.903 -0.323
GMM Plot Result
0.660532682603 [[-6.03733495 -5.15420026]] [  8.47429222  11.36642326] -135.201316066
0.238081388187 [[ 1.7297588   8.51999635]] [ 6.70991077  7.34039789] -136.128330415
0.10138592921 [[ 10.71931303 -12.95641793]] [ 5.11242173  7.32027972] -124.277275077
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.980 0.016 0.046 2.195477e-09 0.035 0.156
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.980 0.017 0.043 2.151743e-09 0.036 0.155

weight mean_x mean_y sig_x sig_y corr
1 0.499 -5.574 3.712 9.917 10.138 0.372
2 0.455 1.377 -10.371 10.888 8.180 -0.266
3 0.046 4.211 6.305 3.871 4.847 -0.516
GMM Plot Result
0.498589902565 [[-5.57425703  3.71237516]] [  7.94422317  11.74787264] 136.695492514
0.454968748891 [[  1.37700096 -10.37079595]] [  7.59344084  11.30467723] -111.297068925
0.0464413485442 [[ 4.21093683  6.30539942]] [ 2.94264998  5.46089337] -146.846649551
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.984 0.008 0.023 1.738861e-09 0.032 0.139
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.982 0.008 0.024 1.922639e-09 0.034 0.146

weight mean_x mean_y sig_x sig_y corr
1 0.695 -5.967 -4.674 10.142 10.279 -0.225
2 0.210 1.806 8.943 6.717 7.111 -0.088
3 0.095 10.827 -12.783 6.682 5.531 -0.344
GMM Plot Result
0.694553305543 [[-5.96738543 -4.67446897]] [  8.98786572  11.30160658] -136.709649987
0.210118971003 [[ 1.8063274   8.94286427]] [ 6.54465367  7.26982134] -151.462717772
0.095327723454 [[ 10.82670166 -12.7826667 ]] [ 4.80661886  7.22075036] -120.509398138
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.982 0.015 0.042 1.923272e-09 0.034 0.146
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.980 0.018 0.048 2.137298e-09 0.036 0.154

weight mean_x mean_y sig_x sig_y corr
1 0.673 -6.103 -4.769 10.070 10.349 -0.253
2 0.218 1.705 8.627 6.999 6.809 -0.073
3 0.109 10.714 -12.907 6.810 6.156 -0.339
GMM Plot Result
0.673460022704 [[-6.10342947 -4.76899544]] [  8.81419968  11.43787716] -138.077199517
0.21750447602 [[ 1.70527025  8.62684671]] [ 6.62862314  7.16961539] -124.739006167
0.109035501277 [[ 10.71362731 -12.90718625]] [ 5.22671258  7.5463512 ] -126.691632306
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.980 0.014 0.040 2.163629e-09 0.036 0.155
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.980 0.014 0.046 2.145500e-09 0.036 0.154

weight mean_x mean_y sig_x sig_y corr
1 0.578 -3.953 -8.341 12.405 9.809 -0.354
2 0.358 -0.958 6.922 8.381 7.529 0.119
3 0.065 3.784 -8.906 10.602 4.842 -0.797
GMM Plot Result
0.577560912567 [[-3.95324108 -8.34056074]] [  8.55438236  13.30133801] -118.120244927
0.357738383493 [[-0.95819799  6.92204312]] [ 7.30360119  8.57820187] -66.0130390133
0.0647007039401 [[ 3.78398941 -8.90591411]] [  2.73902374  11.32864146] -111.297770991
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.982 0.007 0.024 2.009243e-09 0.035 0.149
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.980 0.007 0.023 2.153765e-09 0.036 0.155

weight mean_x mean_y sig_x sig_y corr
1 0.616 -3.754 -7.797 12.370 9.811 -0.345
2 0.329 -0.975 7.245 8.187 7.344 0.162
3 0.055 3.652 -8.783 10.170 4.843 -0.818
GMM Plot Result
0.615865532596 [[-3.75420881 -7.79660362]] [  8.60596741  13.2370698 ] -117.930760567
0.329446649479 [[-0.97475377  7.24512438]] [ 6.98089472  8.49791195] -61.9434754057
0.0546878179255 [[ 3.65208099 -8.78291353]] [  2.58723859  10.96275278] -112.597985476
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.981 0.008 0.024 2.094559e-09 0.035 0.152
R_square K_S Chi_square MSE RMSE / Max RMSE / Mean
0 0.980 0.010 0.023 2.216760e-09 0.037 0.157
Wall time: 56.6 s

7.2 Cross-validation, to select the number of Gaussian

In [79]:
# df = df_all_years.query('(date >= 20100000) & (date < 20150000)')
# df = df.query('(HrMn%400 == 0)')
In [80]:
%%time
from sklearn.cross_validation import train_test_split, KFold

## 5-fold cross validation
gaussian_number_range = arange(1,6)
CV_result_train_all,CV_result_test_all =[],[]
number_of_fold = 4
print('Number of train/test dataset', len(df)*(number_of_fold-1)/number_of_fold, len(df)/number_of_fold) 

for number_of_gaussian in gaussian_number_range:
    print( '  ')
    print('Number of gaussian', number_of_gaussian)
    
    kf = KFold(len(df), n_folds=number_of_fold, shuffle=True) 

    CV_result = Parallel(n_jobs=-1)(delayed(fit_per_fold)(df, train_index, test_index, FIT_METHOD, number_of_gaussian, config) for train_index, test_index in kf)                        

    CV_result_train, CV_result_test = list(zip(*CV_result))
    CV_result_train, CV_result_test = list(CV_result_train), list(CV_result_test)
        
    CV_result_train_all.append(CV_result_train)
    CV_result_test_all.append(CV_result_test)
    
    print('Train')
    pretty_pd_display(CV_result_train)
    print('Test')
    pretty_pd_display(CV_result_test)
Number of train/test dataset 32127.75 10709.25
  
Number of gaussian 1
Train
Chi_square K_S MSE RMSE / Max RMSE / Mean R_square
0 0.076523 0.020021 8.213212e-09 0.069342 0.302042 0.924879
1 0.077415 0.022353 8.425073e-09 0.071585 0.305931 0.921877
2 0.078687 0.020471 8.588433e-09 0.071546 0.308729 0.921028
3 0.075878 0.021048 8.379244e-09 0.071404 0.305114 0.922329
Test
Chi_square K_S MSE RMSE / Max RMSE / Mean R_square
0 0.085534 0.028265 9.692416e-09 0.079137 0.328028 0.908761
1 0.079227 0.021540 8.711833e-09 0.068168 0.310936 0.921291
2 0.077159 0.023322 7.877135e-09 0.068366 0.296108 0.926974
3 0.085493 0.021143 8.941714e-09 0.071286 0.314963 0.919110
  
Number of gaussian 2
Train
Chi_square K_S MSE RMSE / Max RMSE / Mean R_square
0 0.035756 0.011234 2.923398e-09 0.042420 0.180206 0.973028
1 0.033180 0.010635 2.787828e-09 0.040747 0.176061 0.974300
2 0.031337 0.011052 2.707997e-09 0.040187 0.173409 0.974983
3 0.031488 0.011388 2.722906e-09 0.040213 0.173797 0.974951
Test
Chi_square K_S MSE RMSE / Max RMSE / Mean R_square
0 0.034197 0.009379 2.842409e-09 0.039664 0.177621 0.973934
1 0.034507 0.009263 3.141283e-09 0.043859 0.186463 0.971092
2 0.040903 0.017058 3.504590e-09 0.045982 0.197335 0.967982
3 0.047151 0.016256 3.503837e-09 0.046610 0.197617 0.967600
  
Number of gaussian 3
Train
Chi_square K_S MSE RMSE / Max RMSE / Mean R_square
0 0.045614 0.015887 2.213491e-09 0.036333 0.156778 0.979809
1 0.059770 0.015909 2.041880e-09 0.034444 0.150608 0.981112
2 0.052569 0.014643 2.147282e-09 0.036159 0.154392 0.980126
3 0.059745 0.015562 2.109326e-09 0.036008 0.153087 0.980474
Test
Chi_square K_S MSE RMSE / Max RMSE / Mean R_square
0 0.086934 0.022077 2.518965e-09 0.039346 0.167302 0.976108
1 0.049421 0.015024 2.893846e-09 0.043431 0.179210 0.973664
2 0.072281 0.016832 2.412594e-09 0.037405 0.163807 0.978050
3 0.049694 0.016724 2.605199e-09 0.037718 0.169998 0.976326
  
Number of gaussian 4
Train
Chi_square K_S MSE RMSE / Max RMSE / Mean R_square
0 0.019357 0.008800 1.003199e-09 0.024300 0.105563 0.990746
1 0.015887 0.008264 1.020024e-09 0.025192 0.106345 0.990590
2 0.017445 0.008240 1.138369e-09 0.026114 0.112488 0.989517
3 0.017702 0.007519 1.047232e-09 0.024769 0.107884 0.990345
Test
Chi_square K_S MSE RMSE / Max RMSE / Mean R_square
0 0.027371 0.011668 1.971463e-09 0.033601 0.147935 0.981933
1 0.024902 0.012267 1.973790e-09 0.032675 0.148437 0.981924
2 0.017906 0.012736 1.444267e-09 0.029215 0.126490 0.986681
3 0.025525 0.011085 1.887854e-09 0.034751 0.144647 0.982681
  
Number of gaussian 5
Train
Chi_square K_S MSE RMSE / Max RMSE / Mean R_square
0 0.014921 0.007936 7.933969e-10 0.021838 0.093845 0.992703
1 0.398523 0.012651 1.019305e-09 0.024792 0.106436 0.990617
2 0.256044 0.012936 1.039521e-09 0.025100 0.107429 0.990399
3 0.016933 0.007382 7.289195e-10 0.020695 0.089983 0.993262
Test
Chi_square K_S MSE RMSE / Max RMSE / Mean R_square
0 0.018376 0.006937 9.965393e-10 0.024374 0.105289 0.990762
1 0.195342 0.009877 2.003315e-09 0.033697 0.149001 0.981512
2 0.405507 0.021732 2.010259e-09 0.034235 0.149501 0.981649
3 0.023293 0.015930 1.323150e-09 0.028207 0.121189 0.987927
Wall time: 2min 32s
In [81]:
train_scores_mean, train_scores_std = generate_mean_std_gof(CV_result_train_all)
print('Train gof mean, std')
display(train_scores_mean)

test_scores_mean, test_scores_std = generate_mean_std_gof(CV_result_test_all)
print('Test gof mean, std')
display(test_scores_mean)
Train gof mean, std
Chi_square K_S MSE RMSE / Max RMSE / Mean R_square
1 0.077126 0.020973 8.401491e-09 0.070969 0.305454 0.922528
2 0.032940 0.011077 2.785532e-09 0.040892 0.175868 0.974315
3 0.054425 0.015500 2.127995e-09 0.035736 0.153716 0.980380
4 0.017598 0.008206 1.052206e-09 0.025094 0.108070 0.990299
5 0.171605 0.010226 8.952857e-10 0.023106 0.099423 0.991745
Test gof mean, std
Chi_square K_S MSE RMSE / Max RMSE / Mean R_square
1 0.081853 0.023567 8.805775e-09 0.071739 0.312509 0.919034
2 0.039189 0.012989 3.248030e-09 0.044029 0.189759 0.970152
3 0.064583 0.017664 2.607651e-09 0.039475 0.170079 0.976037
4 0.023926 0.011939 1.819343e-09 0.032560 0.141877 0.983305
5 0.160629 0.013619 1.583316e-09 0.030128 0.131245 0.985463
In [82]:
prop_cycle=mpl.rcParams['axes.color_cycle']
gaussian_number_range = train_scores_mean.index
for column, column_name in zip(['R_square','K_S','Chi_square'],["$\ R^2$", "K-S", "$\widetilde{\chi^2} $"]):
    plot(gaussian_number_range, train_scores_mean[column],
             '--', label = 'training', color=prop_cycle[0])
    plt.fill_between(gaussian_number_range, 
                     train_scores_mean[column] - train_scores_std[column],
                     train_scores_mean[column] + train_scores_std[column], 
                     alpha=0.2, color=prop_cycle[0])
    
    plot(gaussian_number_range, test_scores_mean[column],
             '-', label = 'test',color=prop_cycle[1])
    plt.fill_between(gaussian_number_range, 
                 test_scores_mean[column] - test_scores_std[column],
                 test_scores_mean[column] + test_scores_std[column], 
                 alpha=0.2,color=prop_cycle[1])
    plt.xticks(gaussian_number_range)
    print(column)
    plt.locator_params(axis='y', nbins=5)
    plt_configure(xlabel='Number of Gaussian Distributions', ylabel=column_name, 
                  figsize=(3,2), legend={'loc':'best'})
    if column == 'R_square':
        plt.gca().set_ylim(top=1)
    if column == 'K_S' or column == 'Chi_square':
        plt.gca().set_ylim(bottom=0)
    plt.show()
D:\ProgramData\Anaconda3\lib\site-packages\matplotlib\__init__.py:938: UserWarning: axes.color_cycle is deprecated and replaced with axes.prop_cycle; please use the latter.
  warnings.warn(self.msg_depr % (key, alt_key))
R_square
K_S
Chi_square
In [83]:
# fig = plt.figure(figsize=(4.3,2.4))
fig = plt.figure(figsize=(5,2.5))
ax1 = fig.add_subplot(1,2,1) 
plot_2d_prob_density(X, Y, kde_Z, ax=ax1,
                     xlabel='x'+speed_unit_text, ylabel='y'+speed_unit_text, colorbar=False)
ax1.grid(False)
ax2 = fig.add_subplot(1,2,2) 
plot_2d_prob_density(X, Y, pdf_Z, ax=ax2,
                     xlabel='x'+speed_unit_text, ylabel='', colorbar=False)
ax2.grid(False)
ax2.get_yaxis().set_visible(False)
In [ ]:
for fig in [fig_hist, fig_kde, fig_em, fig_gmm]:
    display(fig)
for fig in [fig_time_variability_3d, fig_time_variability_cdf, fig_time_variability_weibull, 
            fig_adjecent_variability_3d, fig_adjecent_variability_cdf, fig_adjecent_variability_weibull,]:
    display(fig)
In [ ]:
import time
save_notebook()
time.sleep(3)
location_name = get_location_name(file_path)
print(location_name)
current_file = 'GMM.ipynb'
output_file = './output_HTML/'+location_name+'.html' 

output_HTML(current_file, output_file)